//printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info);
//printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info);
- // TG: Real HACK FIXME.
- // This is currently necessary because when a new domain is started,
- // the context_switch function of xen/common/schedule.c(__enter_scheduler)
- // never returns. Therefore, the lock must be released.
- // schedule_tail is only called when a domain is started.
- spin_unlock_irq(&schedule_data[current->processor].schedule_lock);
+ // This is necessary because when a new domain is started, our
+ // implementation of context_switch() does not return (switch_to() has
+ // special and peculiar behaviour in this case).
+ context_switch_done();
/* rr7 will be postponed to last point when resuming back to guest */
if(VMX_DOMAIN(current)){
}
}
-void context_switch_finalise(struct vcpu *next)
-{
- /* nothing to do */
-}
-
void continue_running(struct vcpu *same)
{
/* nothing to do */
struct percpu_ctxt {
struct vcpu *curr_vcpu;
- unsigned int context_not_finalised;
unsigned int dirty_segment_mask;
} __cacheline_aligned;
static struct percpu_ctxt percpu_ctxt[NR_CPUS];
!is_idle_domain(next->domain) )
{
__context_switch();
- percpu_ctxt[cpu].context_not_finalised = 1;
- }
-}
-
-void context_switch_finalise(struct vcpu *next)
-{
- unsigned int cpu = smp_processor_id();
- ASSERT(local_irq_is_enabled());
-
- if ( percpu_ctxt[cpu].context_not_finalised )
- {
- percpu_ctxt[cpu].context_not_finalised = 0;
-
- BUG_ON(percpu_ctxt[cpu].curr_vcpu != next);
+ context_switch_done();
+ ASSERT(local_irq_is_enabled());
if ( VMX_DOMAIN(next) )
{
vmx_load_msrs(next);
}
}
+ else
+ {
+ context_switch_done();
+ }
schedule_tail(next);
BUG();
prev->domain->domain_id, prev->vcpu_id,
next->domain->domain_id, next->vcpu_id);
+ schedule_data[cpu].context_switch_in_progress = 1;
context_switch(prev, next);
+ if ( schedule_data[cpu].context_switch_in_progress )
+ context_switch_done();
+}
+void context_switch_done(void)
+{
+ unsigned int cpu = smp_processor_id();
+ ASSERT(schedule_data[cpu].context_switch_in_progress);
spin_unlock_irq(&schedule_data[cpu].schedule_lock);
-
- context_switch_finalise(next);
+ schedule_data[cpu].context_switch_in_progress = 0;
}
/* No locking needed -- pointer comparison is safe :-) */
struct schedule_data {
spinlock_t schedule_lock; /* spinlock protecting curr */
- struct vcpu *curr; /* current task */
- struct vcpu *idle; /* idle task for this cpu */
+ struct vcpu *curr; /* current task */
+ struct vcpu *idle; /* idle task for this cpu */
void *sched_priv;
struct ac_timer s_timer; /* scheduling timer */
unsigned long tick; /* current periodic 'tick' */
+ int context_switch_in_progress;
#ifdef BUCKETS
u32 hist[BUCKETS]; /* for scheduler latency histogram */
#endif
struct vcpu *next);
/*
- * On some architectures (notably x86) it is not possible to entirely load
- * @next's context with interrupts disabled. These may implement a function to
- * finalise loading the new context after interrupts are re-enabled. This
- * function is not given @prev and is not permitted to access it.
+ * If context_switch() does not return to the caller, or you need to perform
+ * some aspects of state restoration with interrupts enabled, then you must
+ * call context_switch_done() at a suitable safe point.
+ *
+ * As when returning from context_switch(), the caller must ensure that the
+ * local CPU is no longer running in the previous VCPU's context, and that the
+ * context is saved to memory. Alternatively, if implementing lazy context
+ * switching, ensure that invoking sync_vcpu_execstate() will switch and
+ * commit the previous VCPU's state.
*/
-extern void context_switch_finalise(
- struct vcpu *next);
+extern void context_switch_done(void);
/* Called by the scheduler to continue running the current VCPU. */
extern void continue_running(